home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Language/OS - Multiplatform Resource Library
/
LANGUAGE OS.iso
/
cpp_libs
/
awe2-0_1.lha
/
awe2-0.1
/
Src
/
RCS
/
SpinLock-umax.s,v
< prev
next >
Wrap
Text File
|
1989-05-17
|
4KB
|
190 lines
head 3.2;
branch ;
access ;
symbols ;
locks grunwald:3.2; strict;
comment @# @;
3.2
date 89.02.20.15.37.41; author grunwald; state Exp;
branches ;
next 3.1;
3.1
date 88.12.20.13.50.34; author grunwald; state Exp;
branches ;
next 1.3;
1.3
date 88.10.30.13.03.43; author grunwald; state Exp;
branches ;
next 1.2;
1.2
date 88.09.21.20.53.29; author grunwald; state Exp;
branches ;
next 1.1;
1.1
date 88.09.18.16.42.31; author grunwald; state Exp;
branches ;
next ;
desc
@@
3.2
log
@Start using Gnu library heaps for schedulers
@
text
@ #
# void
# SpinLock::reserve()
#
#ifdef __Cplusplus__
.globl __SpinLock_reserve
__SpinLock_reserve:
#endif /* __Cplusplus__ */
#ifndef BACK_SPIN_LOCK
movd 4(sp),r0 # r0 = &this->state.
.L1: sbitib $0,0(r0) # Try.
bfs .L2 # Failure - spin.
addqd $1,4(r0) # Bump count.
ret $0 # Return.
.L2: cmpqb $0,0(r0) # Free?
beq .L1 # Yes - try again.
br .L2 # No - spin again.
#else
#
# This is another version of the spinlock code that attempts to have
# each waiting processor ``back off'' a random amount of time before
# acquiring the lock.
#
# Before the lock acquistion is re-attempted, each processor checks
# to see if the lock has been acquired by someone else.
#
# See ``The Performance Implications of Thread Management
# Alternatives for Shared-Memory Multiprocessors''
# By Anderson, Lazowska and Levy, U Wash. Tech. Rep. 88-09-04
#
# The intent is to not have everyone all of a sudden attempt to
# acquire the lock, since this will cause a flurry of bus traffic
# as each does their test-and-set operation on the lock.
#
# However, tests show that it's slower on the Encore. I don't
# know why. Maybe it's because I don't have enough CPUs to cause
# much bus traffic.
#
movd 4(sp),r0 # r0 = &this->state.
.L1: sbitib $0,0(r0) # Try.
bfs .L2 # Failure - spin.
addqd $1,4(r0) # Bump count.
ret $0 # Return.
#
# Called from bottom. Odd contol flow saves branch.
#
.L6: movd 4(sp),r0
cmpqb $0,0(r0) # Free?
beq .L1
.L2: movd r1,tos # free up another register
movqd $0,r1 # record times we do this
.L3: cmpqb $0,0(r0) # Free?
beq .L4 # Yes - delay and then try
addqd $1,r1
br .L3
.L4: movd r1,r0
movd tos,r1
andd $31,r0 # make r0 be in small range
.L5: cmpqd $0,r0 # if counter is zero,
beq .L6 # then possibly retry
addqd $-1,r0 # else delay before acquiring to
br .L5 # reduce bus contention
#endif
#
# void
# SpinFetchAndOp::add()
# 0(sp) = return address
# 4(sp) = this
# 8(sp) = value to add
#ifdef __Cplusplus__
.globl __SpinFetchAndOp_add
__SpinFetchAndOp_add:
#endif /* __Cplusplus__ */
movd 4(sp),r0 # r0 = &this->state.
.L3: sbitib $0,0(r0) # Try.
bfs .L4 # Failure - spin.
addqd $1,4(r0) # Bump count.
movd 8(r0),tos
addd 12(sp),8(r0) # Add in counter (remember push)
movqd $0,0(r0) # clear reservation
movd tos,r0
ret $0 # Return.
.L4: cmpqb $0,0(r0) # Free?
beq .L3 # Yes - try again.
br .L4 # No - spin again.
@
3.1
log
@Steay version
@
text
@@
1.3
log
@*** empty log message ***
@
text
@@
1.2
log
@*** empty log message ***
@
text
@d3 1
a3 1
# HardSpinLock::reserve()
d5 4
a8 4
#ifdef __GCC__
.globl __HardSpinLock_reserve
__HardSpinLock_reserve:
#endif /* __GCC__ */
d10 1
d19 1
d21 46
d68 1
a68 1
# HardFetchAndOp::add()
d72 4
a75 4
#ifdef __GCC__
.globl __HardFetchAndOp_add
__HardFetchAndOp_add:
#endif /* __GCC__ */
@
1.1
log
@Initial revision
@
text
@d3 1
a3 1
# MuxLock::reserve()
d18 23
@